Ocular Disease Intelligent Recognition (ODIR)¶

Using Convolutional Neural Network to Predict Ocular Diseases¶

Project by: 22BAI1213 Joshua S Raju, 22BRS1372 Md Rameez Haider, 22BAI1427 Shubham Yadav, 22BAI1425 Anton K Sam

This particular CNN model aims to predict three types of Ocular Diseases:

  1. Cataract

Dataset : https://www.kaggle.com/datasets/andrewmvd/ocular-disease-recognition-odir5k

Importing Dependencies¶

In [1]:
import numpy as np
import os

import cv2
import matplotlib.pylab as plt
import imghdr

import tensorflow as tf

Tensorflow GPU Management

In [2]:
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

Cleaning Image Data¶

In [3]:
data_dir = 'imgdata_v2'
In [4]:
img_exts = ['jpeg', 'jpg', 'png']
img_exts
Out[4]:
['jpeg', 'jpg', 'png']
In [5]:
# from PIL import ImageFile
# ImageFile.LOAD_TRUNCATED_IMAGES = True

for image_class in os.listdir(data_dir):
    for image in os.listdir(os.path.join(data_dir, image_class)):
        img_path = os.path.join(data_dir, image_class,image)
        img = plt.imread(img_path)
        ext = imghdr.what(img_path)
        try:
            if ext not in img_exts:
                os.remove(img_path)
                print('{} successfully removed'.format(img_path))
        except Error as e :
                print('Issue with {}'.format(img_path))

Image Enhancement Functions¶

Crop Function

In [6]:
lower = np.array([0,0,0])
higher = np.array([30,30,30])

def crop(image):
    img_copy = image.copy()
    mask  = cv2.inRange(image, lower, higher)
    inverted_mask = cv2.bitwise_not(mask)

    cont,_ = cv2.findContours(inverted_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(image, cont, -1, 255, 10)

    c = max(cont, key=cv2.contourArea)
    x,y,w,h = cv2.boundingRect(c)
    img_cropped = img_copy[y:y+h, x:x+w]

    print('{} successfully cropped'.format(img_path))
    
    return img_cropped

Contour Detection Function

In [7]:
lower = np.array([0,0,0])
higher = np.array([30,30,30])

def contour(image):
    mask  = cv2.inRange(image, lower, higher)
    inverted_mask = cv2.bitwise_not(mask)
    cont,_ = cv2.findContours(inverted_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    
    return len(cont)

Sharpening Function

In [8]:
kernel_sharpen = np.array([[-1,-1,-1],
                          [-1,9,-1],
                          [-1,-1,-1]])

def sharpen(image):
    img_sharpen = cv2.filter2D(image, -1, kernel_sharpen)

    return img_sharpen

Save Function

In [9]:
def save(path, image):
    plt.imsave(path, image)

PreProcessing Image¶

In [10]:
for image_class in os.listdir(data_dir):
    for image in os.listdir(os.path.join(data_dir, image_class)):
        img_path = os.path.join(data_dir, image_class,image)
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        
        if img[0][0][0] == 255:
            os.remove(img_path)
            print('{} White-background image successfully removed'.format(img_path))
        elif img.shape[0] != img.shape[1]:
            img_crop = crop(img)
            img_resize = cv2.resize(img_crop, (512,512))
            img_sharpen = sharpen(img_resize)
            save(img_path, img_sharpen)
        # elif contour(img)>1:
        #     os.remove(img_path)
        #     print('{} Non-transformable image removed'.format(img_path))
        else:
            img_resize = cv2.resize(img, (512,512))
            img_sharpen = sharpen(img_resize)
            save(img_path, img_sharpen)  
imgdata_v2\cataract\2174_right.jpg White-background image successfully removed
imgdata_v2\cataract\2175_left.jpg White-background image successfully removed
imgdata_v2\cataract\2176_left.jpg White-background image successfully removed
imgdata_v2\cataract\2177_right.jpg White-background image successfully removed
imgdata_v2\cataract\2178_right.jpg White-background image successfully removed
imgdata_v2\cataract\2179_right.jpg White-background image successfully removed
imgdata_v2\cataract\2180_left.jpg White-background image successfully removed
imgdata_v2\cataract\2180_right.jpg White-background image successfully removed
imgdata_v2\cataract\2181_left.jpg White-background image successfully removed
imgdata_v2\cataract\2181_right.jpg White-background image successfully removed
imgdata_v2\cataract\2182_left.jpg White-background image successfully removed
imgdata_v2\cataract\2182_right.jpg White-background image successfully removed
imgdata_v2\cataract\cataract_001.png successfully cropped
imgdata_v2\cataract\cataract_002.png successfully cropped
imgdata_v2\cataract\cataract_003.png successfully cropped
imgdata_v2\cataract\cataract_004.png successfully cropped
imgdata_v2\cataract\cataract_005.png successfully cropped
imgdata_v2\cataract\cataract_006.png successfully cropped
imgdata_v2\cataract\cataract_007.png successfully cropped
imgdata_v2\cataract\cataract_008.png successfully cropped
imgdata_v2\cataract\cataract_009.png successfully cropped
imgdata_v2\cataract\cataract_010.png successfully cropped
imgdata_v2\cataract\cataract_011.png successfully cropped
imgdata_v2\cataract\cataract_012.png successfully cropped
imgdata_v2\cataract\cataract_013.png successfully cropped
imgdata_v2\cataract\cataract_014.png successfully cropped
imgdata_v2\cataract\cataract_015.png successfully cropped
imgdata_v2\cataract\cataract_016.png successfully cropped
imgdata_v2\cataract\cataract_017.png successfully cropped
imgdata_v2\cataract\cataract_018.png successfully cropped
imgdata_v2\cataract\cataract_019.png successfully cropped
imgdata_v2\cataract\cataract_020.png successfully cropped
imgdata_v2\cataract\cataract_021.png successfully cropped
imgdata_v2\cataract\cataract_022.png successfully cropped
imgdata_v2\cataract\cataract_023.png successfully cropped
imgdata_v2\cataract\cataract_024.png successfully cropped
imgdata_v2\cataract\cataract_025.png successfully cropped
imgdata_v2\cataract\cataract_026.png successfully cropped
imgdata_v2\cataract\cataract_027.png successfully cropped
imgdata_v2\cataract\cataract_028.png successfully cropped
imgdata_v2\cataract\cataract_029.png successfully cropped
imgdata_v2\cataract\cataract_030.png successfully cropped
imgdata_v2\cataract\cataract_031.png successfully cropped
imgdata_v2\cataract\cataract_032.png successfully cropped
imgdata_v2\cataract\cataract_033.png successfully cropped
imgdata_v2\cataract\cataract_034.png successfully cropped
imgdata_v2\cataract\cataract_035.png successfully cropped
imgdata_v2\cataract\cataract_036.png successfully cropped
imgdata_v2\cataract\cataract_037.png successfully cropped
imgdata_v2\cataract\cataract_038.png successfully cropped
imgdata_v2\cataract\cataract_039.png successfully cropped
imgdata_v2\cataract\cataract_040.png successfully cropped
imgdata_v2\cataract\cataract_041.png successfully cropped
imgdata_v2\cataract\cataract_042.png successfully cropped
imgdata_v2\cataract\cataract_043.png successfully cropped
imgdata_v2\cataract\cataract_044.png successfully cropped
imgdata_v2\cataract\cataract_045.png successfully cropped
imgdata_v2\cataract\cataract_046.png successfully cropped
imgdata_v2\cataract\cataract_047.png successfully cropped
imgdata_v2\cataract\cataract_048.png successfully cropped
imgdata_v2\cataract\cataract_049.png successfully cropped
imgdata_v2\cataract\cataract_050.png successfully cropped
imgdata_v2\cataract\cataract_051.png successfully cropped
imgdata_v2\cataract\cataract_052.png successfully cropped
imgdata_v2\cataract\cataract_053.png successfully cropped
imgdata_v2\cataract\cataract_054.png successfully cropped
imgdata_v2\cataract\cataract_055.png successfully cropped
imgdata_v2\cataract\cataract_056.png successfully cropped
imgdata_v2\cataract\cataract_057.png successfully cropped
imgdata_v2\cataract\cataract_058.png successfully cropped
imgdata_v2\cataract\cataract_059.png successfully cropped
imgdata_v2\cataract\cataract_060.png successfully cropped
imgdata_v2\cataract\cataract_061.png successfully cropped
imgdata_v2\cataract\cataract_062.png successfully cropped
imgdata_v2\cataract\cataract_063.png successfully cropped
imgdata_v2\cataract\cataract_064.png successfully cropped
imgdata_v2\cataract\cataract_065.png successfully cropped
imgdata_v2\cataract\cataract_066.png successfully cropped
imgdata_v2\cataract\cataract_067.png successfully cropped
imgdata_v2\cataract\cataract_068.png successfully cropped
imgdata_v2\cataract\cataract_069.png successfully cropped
imgdata_v2\cataract\cataract_070.png successfully cropped
imgdata_v2\cataract\cataract_071.png successfully cropped
imgdata_v2\cataract\cataract_072.png successfully cropped
imgdata_v2\cataract\cataract_073.png successfully cropped
imgdata_v2\cataract\cataract_074.png successfully cropped
imgdata_v2\cataract\cataract_075.png successfully cropped
imgdata_v2\cataract\cataract_076.png successfully cropped
imgdata_v2\cataract\cataract_077.png successfully cropped
imgdata_v2\cataract\cataract_078.png successfully cropped
imgdata_v2\cataract\cataract_079.png successfully cropped
imgdata_v2\cataract\cataract_080.png successfully cropped
imgdata_v2\cataract\cataract_081.png successfully cropped
imgdata_v2\cataract\cataract_082.png successfully cropped
imgdata_v2\cataract\cataract_083.png successfully cropped
imgdata_v2\cataract\cataract_084.png successfully cropped
imgdata_v2\cataract\cataract_085.png successfully cropped
imgdata_v2\cataract\cataract_086.png successfully cropped
imgdata_v2\cataract\cataract_087.png successfully cropped
imgdata_v2\cataract\cataract_088.png successfully cropped
imgdata_v2\cataract\cataract_089.png successfully cropped
imgdata_v2\cataract\cataract_090.png successfully cropped
imgdata_v2\cataract\cataract_091.png successfully cropped
imgdata_v2\cataract\cataract_092.png successfully cropped
imgdata_v2\cataract\cataract_093.png successfully cropped
imgdata_v2\cataract\cataract_094.png successfully cropped
imgdata_v2\cataract\cataract_095.png successfully cropped
imgdata_v2\cataract\cataract_096.png successfully cropped
imgdata_v2\cataract\cataract_097.png successfully cropped
imgdata_v2\cataract\cataract_098.png successfully cropped
imgdata_v2\cataract\cataract_099.png successfully cropped
imgdata_v2\cataract\cataract_100.png successfully cropped
imgdata_v2\cataract\_130_3561448.jpg White-background image successfully removed
imgdata_v2\cataract\_130_7837321.jpg White-background image successfully removed
imgdata_v2\cataract\_131_7587386.jpg White-background image successfully removed
imgdata_v2\cataract\_131_863673.jpg White-background image successfully removed
imgdata_v2\cataract\_132_7102134.jpg White-background image successfully removed
imgdata_v2\cataract\_133_1300923.jpg White-background image successfully removed
imgdata_v2\cataract\_135_5639464.jpg White-background image successfully removed
imgdata_v2\cataract\_135_7789155.jpg White-background image successfully removed
imgdata_v2\cataract\_137_7856304.jpg White-background image successfully removed
imgdata_v2\cataract\_138_1949117.jpg White-background image successfully removed
imgdata_v2\cataract\_139_583821.jpg White-background image successfully removed
imgdata_v2\cataract\_140_1534199.jpg White-background image successfully removed
imgdata_v2\cataract\_141_3457945.jpg White-background image successfully removed
imgdata_v2\cataract\_141_9740629.jpg White-background image successfully removed
imgdata_v2\cataract\_142_7918627.jpg White-background image successfully removed
imgdata_v2\cataract\_143_1247400.jpg White-background image successfully removed
imgdata_v2\cataract\_143_9392801.jpg White-background image successfully removed
imgdata_v2\normal\2957_left.jpg White-background image successfully removed
imgdata_v2\normal\2957_right.jpg White-background image successfully removed

Creating Image Dataset¶

In [11]:
dataset = tf.keras.utils.image_dataset_from_directory(data_dir, image_size=(256, 256))
Found 2081 files belonging to 2 classes.
In [12]:
batch = dataset.as_numpy_iterator().next()
len(batch)
Out[12]:
2
In [13]:
classes = ['cataract', 'normal']
classes
Out[13]:
['cataract', 'normal']
In [14]:
fig, axs = plt.subplots(5,5, figsize=(20,20))
for idx, img in enumerate(batch[0][:25]):
    row = idx//5
    col = idx%5

    axs[row, col].imshow(img.astype(int))
    axs[row, col].axis('off')
    axs[row, col].set_title(classes[batch[1][idx]])
No description has been provided for this image

Image Normalization¶

In [18]:
batch[0][0].shape, batch[0][0].max(), batch[0][0].min()
Out[18]:
((256, 256, 3), 255.0, 0.0)
In [22]:
data = dataset.map(lambda x, y: (x/255, y))
In [23]:
data.as_numpy_iterator().next()[0]
Out[23]:
array([[[[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        ...,

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]]],


       [[[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        ...,

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]]],


       [[[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        ...,

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]]],


       ...,


       [[[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        ...,

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]]],


       [[[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        ...,

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]]],


       [[[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        ...,

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]],

        [[0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.],
         ...,
         [0., 0., 0.],
         [0., 0., 0.],
         [0., 0., 0.]]]], dtype=float32)
In [24]:
data.as_numpy_iterator().next()[0].max(), data.as_numpy_iterator().next()[0].min()
Out[24]:
(1.0, 0.0)
In [25]:
len(data)
Out[25]:
66

Splitting Dataset¶

In [26]:
training_size = int(len(data)*0.7)
val_size = int(len(data)*0.2) + 1
test_size = int(len(data)*0.1)

training_size + val_size + test_size
Out[26]:
66
In [27]:
training_data = data.take(training_size)
val_data = data.skip(training_size).take(val_size)
test_data =  data.skip(training_size + val_size).take(test_size)

Creating CNN Model¶

In [28]:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
In [29]:
model = Sequential()
In [30]:
data.as_numpy_iterator().next()[0][0].shape
Out[30]:
(256, 256, 3)
In [31]:
model.add(Conv2D(32, (3,3), 1, activation = 'relu', input_shape = (256, 256, 3)))
model.add(MaxPooling2D())
model.add(Dropout(0.5))

model.add(Conv2D(32, (3,3), 1, activation = 'relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.5))

model.add(Conv2D(16, (3,3), 1, activation = 'relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.5))

model.add(Conv2D(16, (3,3), 1, activation = 'relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.5))


model.add(Flatten())

model.add(Dense(256, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation = 'sigmoid'))
In [32]:
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
In [33]:
model.summary()
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d (Conv2D)             (None, 254, 254, 32)      896       
                                                                 
 max_pooling2d (MaxPooling2D  (None, 127, 127, 32)     0         
 )                                                               
                                                                 
 dropout (Dropout)           (None, 127, 127, 32)      0         
                                                                 
 conv2d_1 (Conv2D)           (None, 125, 125, 32)      9248      
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 62, 62, 32)       0         
 2D)                                                             
                                                                 
 dropout_1 (Dropout)         (None, 62, 62, 32)        0         
                                                                 
 conv2d_2 (Conv2D)           (None, 60, 60, 16)        4624      
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 30, 30, 16)       0         
 2D)                                                             
                                                                 
 dropout_2 (Dropout)         (None, 30, 30, 16)        0         
                                                                 
 conv2d_3 (Conv2D)           (None, 28, 28, 16)        2320      
                                                                 
 max_pooling2d_3 (MaxPooling  (None, 14, 14, 16)       0         
 2D)                                                             
                                                                 
 dropout_3 (Dropout)         (None, 14, 14, 16)        0         
                                                                 
 flatten (Flatten)           (None, 3136)              0         
                                                                 
 dense (Dense)               (None, 256)               803072    
                                                                 
 dropout_4 (Dropout)         (None, 256)               0         
                                                                 
 dense_1 (Dense)             (None, 1)                 257       
                                                                 
=================================================================
Total params: 820,417
Trainable params: 820,417
Non-trainable params: 0
_________________________________________________________________

Training The Model¶

In [34]:
log_dir = 'LogDir'
In [35]:
tensorflowboard_callback = tf.keras.callbacks.TensorBoard(log_dir)
In [36]:
hist = model.fit(training_data, epochs = 30, validation_data = val_data, callbacks=[tensorflowboard_callback] )
Epoch 1/30
46/46 [==============================] - 22s 160ms/step - loss: 0.6922 - accuracy: 0.5836 - val_loss: 0.6691 - val_accuracy: 0.6875
Epoch 2/30
46/46 [==============================] - 4s 75ms/step - loss: 0.5489 - accuracy: 0.7351 - val_loss: 0.6334 - val_accuracy: 0.6875
Epoch 3/30
46/46 [==============================] - 3s 72ms/step - loss: 0.4670 - accuracy: 0.8193 - val_loss: 0.6079 - val_accuracy: 0.6808
Epoch 4/30
46/46 [==============================] - 3s 72ms/step - loss: 0.3716 - accuracy: 0.8485 - val_loss: 0.5958 - val_accuracy: 0.6763
Epoch 5/30
46/46 [==============================] - 3s 72ms/step - loss: 0.3724 - accuracy: 0.8458 - val_loss: 0.5323 - val_accuracy: 0.7924
Epoch 6/30
46/46 [==============================] - 3s 71ms/step - loss: 0.3442 - accuracy: 0.8526 - val_loss: 0.6219 - val_accuracy: 0.6205
Epoch 7/30
46/46 [==============================] - 3s 72ms/step - loss: 0.3577 - accuracy: 0.8499 - val_loss: 0.5109 - val_accuracy: 0.8036
Epoch 8/30
46/46 [==============================] - 3s 70ms/step - loss: 0.3214 - accuracy: 0.8607 - val_loss: 0.5609 - val_accuracy: 0.7076
Epoch 9/30
46/46 [==============================] - 3s 71ms/step - loss: 0.2993 - accuracy: 0.8730 - val_loss: 0.5308 - val_accuracy: 0.7790
Epoch 10/30
46/46 [==============================] - 3s 71ms/step - loss: 0.3091 - accuracy: 0.8723 - val_loss: 0.5109 - val_accuracy: 0.8393
Epoch 11/30
46/46 [==============================] - 3s 73ms/step - loss: 0.2916 - accuracy: 0.8696 - val_loss: 0.5028 - val_accuracy: 0.8594
Epoch 12/30
46/46 [==============================] - 3s 73ms/step - loss: 0.3143 - accuracy: 0.8702 - val_loss: 0.5019 - val_accuracy: 0.8795
Epoch 13/30
46/46 [==============================] - 4s 74ms/step - loss: 0.2926 - accuracy: 0.8709 - val_loss: 0.4713 - val_accuracy: 0.8571
Epoch 14/30
46/46 [==============================] - 4s 73ms/step - loss: 0.2984 - accuracy: 0.8716 - val_loss: 0.4848 - val_accuracy: 0.8482
Epoch 15/30
46/46 [==============================] - 4s 76ms/step - loss: 0.2680 - accuracy: 0.8852 - val_loss: 0.4739 - val_accuracy: 0.7924
Epoch 16/30
46/46 [==============================] - 4s 75ms/step - loss: 0.2557 - accuracy: 0.8872 - val_loss: 0.4345 - val_accuracy: 0.8951
Epoch 17/30
46/46 [==============================] - 3s 71ms/step - loss: 0.2440 - accuracy: 0.9008 - val_loss: 0.3874 - val_accuracy: 0.8862
Epoch 18/30
46/46 [==============================] - 3s 70ms/step - loss: 0.2664 - accuracy: 0.8832 - val_loss: 0.4253 - val_accuracy: 0.8817
Epoch 19/30
46/46 [==============================] - 3s 71ms/step - loss: 0.2497 - accuracy: 0.9015 - val_loss: 0.3587 - val_accuracy: 0.8683
Epoch 20/30
46/46 [==============================] - 3s 70ms/step - loss: 0.2500 - accuracy: 0.8961 - val_loss: 0.3925 - val_accuracy: 0.8772
Epoch 21/30
46/46 [==============================] - 3s 70ms/step - loss: 0.2354 - accuracy: 0.8981 - val_loss: 0.3920 - val_accuracy: 0.8750
Epoch 22/30
46/46 [==============================] - 3s 70ms/step - loss: 0.2742 - accuracy: 0.8920 - val_loss: 0.3616 - val_accuracy: 0.8527
Epoch 23/30
46/46 [==============================] - 3s 71ms/step - loss: 0.2154 - accuracy: 0.9124 - val_loss: 0.3166 - val_accuracy: 0.9085
Epoch 24/30
46/46 [==============================] - 3s 69ms/step - loss: 0.2634 - accuracy: 0.8940 - val_loss: 0.3142 - val_accuracy: 0.9129
Epoch 25/30
46/46 [==============================] - 3s 71ms/step - loss: 0.2262 - accuracy: 0.9056 - val_loss: 0.2920 - val_accuracy: 0.9018
Epoch 26/30
46/46 [==============================] - 3s 70ms/step - loss: 0.2379 - accuracy: 0.9008 - val_loss: 0.3016 - val_accuracy: 0.9196
Epoch 27/30
46/46 [==============================] - 3s 70ms/step - loss: 0.2017 - accuracy: 0.9124 - val_loss: 0.3204 - val_accuracy: 0.8638
Epoch 28/30
46/46 [==============================] - 3s 71ms/step - loss: 0.2097 - accuracy: 0.9035 - val_loss: 0.2985 - val_accuracy: 0.8616
Epoch 29/30
46/46 [==============================] - 3s 69ms/step - loss: 0.2198 - accuracy: 0.9008 - val_loss: 0.3055 - val_accuracy: 0.8906
Epoch 30/30
46/46 [==============================] - 3s 71ms/step - loss: 0.2106 - accuracy: 0.9049 - val_loss: 0.2854 - val_accuracy: 0.8996
In [37]:
hist.history
Out[37]:
{'loss': [0.6921729445457458,
  0.5488530993461609,
  0.46699216961860657,
  0.3716110587120056,
  0.3724481761455536,
  0.34417495131492615,
  0.35769227147102356,
  0.3214152753353119,
  0.29930663108825684,
  0.30912306904792786,
  0.2915944755077362,
  0.3143465518951416,
  0.29258355498313904,
  0.29837557673454285,
  0.26796436309814453,
  0.25568461418151855,
  0.24402566254138947,
  0.26636794209480286,
  0.24967527389526367,
  0.2500332295894623,
  0.2353822886943817,
  0.27423399686813354,
  0.21541178226470947,
  0.26338180899620056,
  0.2262057065963745,
  0.23785282671451569,
  0.20169278979301453,
  0.20967566967010498,
  0.21977007389068604,
  0.21062934398651123],
 'accuracy': [0.5835598111152649,
  0.7350543737411499,
  0.8192934989929199,
  0.848505437374115,
  0.8457880616188049,
  0.8525815010070801,
  0.84986412525177,
  0.860733687877655,
  0.8729619383811951,
  0.8722826242446899,
  0.8695651888847351,
  0.870244562625885,
  0.8709239363670349,
  0.87160325050354,
  0.8851901888847351,
  0.88722825050354,
  0.9008151888847351,
  0.883152186870575,
  0.901494562625885,
  0.8960598111152649,
  0.898097813129425,
  0.891983687877655,
  0.91236412525177,
  0.89402174949646,
  0.9055706262588501,
  0.9008151888847351,
  0.91236412525177,
  0.9035326242446899,
  0.9008151888847351,
  0.904891312122345],
 'val_loss': [0.6690706610679626,
  0.633405864238739,
  0.6078591346740723,
  0.5957918167114258,
  0.5323455929756165,
  0.6218687891960144,
  0.5108678936958313,
  0.5608671307563782,
  0.5307727456092834,
  0.5109450221061707,
  0.5028263926506042,
  0.5019455552101135,
  0.4712865352630615,
  0.4847570061683655,
  0.47389551997184753,
  0.4344886243343353,
  0.3873830735683441,
  0.425285279750824,
  0.3587368428707123,
  0.3924557864665985,
  0.39196181297302246,
  0.36155134439468384,
  0.3166485130786896,
  0.3141867518424988,
  0.291970431804657,
  0.30161529779434204,
  0.3203575313091278,
  0.29848021268844604,
  0.3054675757884979,
  0.2854112982749939],
 'val_accuracy': [0.6875,
  0.6875,
  0.6808035969734192,
  0.6763392686843872,
  0.7924107313156128,
  0.6205357313156128,
  0.8035714030265808,
  0.7075892686843872,
  0.7790178656578064,
  0.8392857313156128,
  0.859375,
  0.8794642686843872,
  0.8571428656578064,
  0.8482142686843872,
  0.7924107313156128,
  0.8950892686843872,
  0.8861607313156128,
  0.8816964030265808,
  0.8683035969734192,
  0.8772321343421936,
  0.875,
  0.8526785969734192,
  0.9084821343421936,
  0.9129464030265808,
  0.9017857313156128,
  0.9196428656578064,
  0.8638392686843872,
  0.8616071343421936,
  0.890625,
  0.8995535969734192]}

Model Performance¶

In [39]:
fig, axs = plt.subplots(2, 1, figsize=(10,10))
axs[0].plot(hist.history['loss'], color='red', label='Loss')
axs[0].plot(hist.history['val_loss'], color='orange', label='Validation Loss')
axs[0].legend(loc='upper right')

axs[1].plot(hist.history['accuracy'], 
             color='green', 
             label='Accuracy')
axs[1].plot(hist.history['val_accuracy'], 
             color='teal', 
             label='Validation Accuracy')
axs[1].legend(loc='upper left')
Out[39]:
<matplotlib.legend.Legend at 0x264a196f670>
No description has been provided for this image

Evaluating Model¶

In [42]:
from tensorflow.keras.metrics import Precision, Recall, BinaryAccuracy, SpecificityAtSensitivity

pre = Precision()
acc = BinaryAccuracy()
rec = Recall()
spe = SpecificityAtSensitivity(0.5)
In [43]:
for batch in test_data.as_numpy_iterator():
    X, y = batch
    y_pred = model.predict(X)
    pre.update_state(y, y_pred)
    rec.update_state(y, y_pred)
    acc.update_state(y, y_pred)
    spe.update_state(y, y_pred)
1/1 [==============================] - 0s 94ms/step
1/1 [==============================] - 0s 59ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 228ms/step
In [44]:
precision = pre.result().numpy()
recall = rec.result().numpy()
f1 = 2 * (precision*recall) / (precision+recall)

print('Accuracy\t:', acc.result().numpy())
print('Precision\t:', precision)
print('Recall\t\t:', recall)
print('Specificity\t:', spe.result().numpy())
print('F1\t\t:', f1)
Accuracy	: 0.91925466
Precision	: 0.8961039
Recall		: 0.9324324
Specificity	: 1.0
F1		: 0.9139073050603324

Exporting Model¶

In [45]:
model.save(os.path.join('models', 'ODIR_NoDataAug.h5'))
In [46]:
from tensorflow.keras.models import load_model

model = load_model(os.path.join('models', 'ODIR_NoDataAug.h5'))
model.summary()
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d (Conv2D)             (None, 254, 254, 32)      896       
                                                                 
 max_pooling2d (MaxPooling2D  (None, 127, 127, 32)     0         
 )                                                               
                                                                 
 dropout (Dropout)           (None, 127, 127, 32)      0         
                                                                 
 conv2d_1 (Conv2D)           (None, 125, 125, 32)      9248      
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 62, 62, 32)       0         
 2D)                                                             
                                                                 
 dropout_1 (Dropout)         (None, 62, 62, 32)        0         
                                                                 
 conv2d_2 (Conv2D)           (None, 60, 60, 16)        4624      
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 30, 30, 16)       0         
 2D)                                                             
                                                                 
 dropout_2 (Dropout)         (None, 30, 30, 16)        0         
                                                                 
 conv2d_3 (Conv2D)           (None, 28, 28, 16)        2320      
                                                                 
 max_pooling2d_3 (MaxPooling  (None, 14, 14, 16)       0         
 2D)                                                             
                                                                 
 dropout_3 (Dropout)         (None, 14, 14, 16)        0         
                                                                 
 flatten (Flatten)           (None, 3136)              0         
                                                                 
 dense (Dense)               (None, 256)               803072    
                                                                 
 dropout_4 (Dropout)         (None, 256)               0         
                                                                 
 dense_1 (Dense)             (None, 1)                 257       
                                                                 
=================================================================
Total params: 820,417
Trainable params: 820,417
Non-trainable params: 0
_________________________________________________________________